Skip to content
  • Sponsor TheAlgorithms/Python

  • Notifications You must be signed in to change notification settings
  • Fork 47k

Commit a2fa32c

Browse files
cclausslukazlimpre-commit-ci[bot]
authoredMay 14, 2025··
Lukazlim: Replace dependency requests with httpx (#12744)
* Replace dependency `requests` with `httpx` Fixes #12742 Signed-off-by: Lim, Lukaz Wei Hwang <lukaz.wei.hwang.lim@intel.com> * updating DIRECTORY.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Signed-off-by: Lim, Lukaz Wei Hwang <lukaz.wei.hwang.lim@intel.com> Co-authored-by: Lim, Lukaz Wei Hwang <lukaz.wei.hwang.lim@intel.com> Co-authored-by: cclauss <cclauss@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
1 parent 6e4d1b3 commit a2fa32c

40 files changed

+971
-654
lines changed
 

‎DIRECTORY.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -899,6 +899,7 @@
899899
* [N Body Simulation](physics/n_body_simulation.py)
900900
* [Newtons Law Of Gravitation](physics/newtons_law_of_gravitation.py)
901901
* [Newtons Second Law Of Motion](physics/newtons_second_law_of_motion.py)
902+
* [Orbital Transfer Work](physics/orbital_transfer_work.py)
902903
* [Period Of Pendulum](physics/period_of_pendulum.py)
903904
* [Photoelectric Effect](physics/photoelectric_effect.py)
904905
* [Potential Energy](physics/potential_energy.py)

‎machine_learning/linear_regression.py

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,16 +8,24 @@
88
Rating). We try to best fit a line through dataset and estimate the parameters.
99
"""
1010

11+
# /// script
12+
# requires-python = ">=3.13"
13+
# dependencies = [
14+
# "httpx",
15+
# "numpy",
16+
# ]
17+
# ///
18+
19+
import httpx
1120
import numpy as np
12-
import requests
1321

1422

1523
def collect_dataset():
1624
"""Collect dataset of CSGO
1725
The dataset contains ADR vs Rating of a Player
1826
:return : dataset obtained from the link, as matrix
1927
"""
20-
response = requests.get(
28+
response = httpx.get(
2129
"https://raw.githubusercontent.com/yashLadha/The_Math_of_Intelligence/"
2230
"master/Week1/ADRvsRating.csv",
2331
timeout=10,

‎physics/speeds_of_gas_molecules.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -59,9 +59,9 @@ def avg_speed_of_molecule(temperature: float, molar_mass: float) -> float:
5959
Examples:
6060
6161
>>> avg_speed_of_molecule(273, 0.028) # nitrogen at 273 K
62-
454.3488755020387
62+
454.3488755062257
6363
>>> avg_speed_of_molecule(300, 0.032) # oxygen at 300 K
64-
445.52572733919885
64+
445.5257273433045
6565
>>> avg_speed_of_molecule(-273, 0.028) # invalid temperature
6666
Traceback (most recent call last):
6767
...
@@ -87,9 +87,9 @@ def mps_speed_of_molecule(temperature: float, molar_mass: float) -> float:
8787
Examples:
8888
8989
>>> mps_speed_of_molecule(273, 0.028) # nitrogen at 273 K
90-
402.65620701908966
90+
402.65620702280023
9191
>>> mps_speed_of_molecule(300, 0.032) # oxygen at 300 K
92-
394.836895549922
92+
394.8368955535605
9393
>>> mps_speed_of_molecule(-273, 0.028) # invalid temperature
9494
Traceback (most recent call last):
9595
...

‎pyproject.toml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ classifiers = [
1111
dependencies = [
1212
"beautifulsoup4>=4.12.3",
1313
"fake-useragent>=1.5.1",
14+
"httpx>=0.28.1",
1415
"imageio>=2.36.1",
1516
"keras>=3.7",
1617
"lxml>=5.3",
@@ -19,7 +20,6 @@ dependencies = [
1920
"opencv-python>=4.10.0.84",
2021
"pandas>=2.2.3",
2122
"pillow>=11",
22-
"requests>=2.32.3",
2323
"rich>=13.9.4",
2424
"scikit-learn>=1.5.2",
2525
"sphinx-pyproject>=0.3",
@@ -42,8 +42,8 @@ docs = [
4242
"sphinx-pyproject>=0.3",
4343
]
4444
euler-validate = [
45+
"httpx>=0.28.1",
4546
"numpy>=2.1.3",
46-
"requests>=2.32.3",
4747
]
4848

4949
[tool.ruff]

‎requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
beautifulsoup4
22
fake-useragent
3+
httpx
34
imageio
45
keras
56
lxml
@@ -8,7 +9,6 @@ numpy
89
opencv-python
910
pandas
1011
pillow
11-
requests
1212
rich
1313
scikit-learn
1414
sphinx-pyproject

‎scripts/validate_solutions.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,8 @@
33
# /// script
44
# requires-python = ">=3.13"
55
# dependencies = [
6+
# "httpx",
67
# "pytest",
7-
# "requests",
88
# ]
99
# ///
1010

@@ -15,8 +15,8 @@
1515
import pathlib
1616
from types import ModuleType
1717

18+
import httpx
1819
import pytest
19-
import requests
2020

2121
PROJECT_EULER_DIR_PATH = pathlib.Path.cwd().joinpath("project_euler")
2222
PROJECT_EULER_ANSWERS_PATH = pathlib.Path.cwd().joinpath(
@@ -66,7 +66,7 @@ def added_solution_file_path() -> list[pathlib.Path]:
6666
"Accept": "application/vnd.github.v3+json",
6767
"Authorization": "token " + os.environ["GITHUB_TOKEN"],
6868
}
69-
files = requests.get(get_files_url(), headers=headers, timeout=10).json()
69+
files = httpx.get(get_files_url(), headers=headers, timeout=10).json()
7070
for file in files:
7171
filepath = pathlib.Path.cwd().joinpath(file["filename"])
7272
if (

‎uv.lock

Lines changed: 582 additions & 526 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

‎web_programming/co2_emission.py

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,22 +2,29 @@
22
Get CO2 emission data from the UK CarbonIntensity API
33
"""
44

5+
# /// script
6+
# requires-python = ">=3.13"
7+
# dependencies = [
8+
# "httpx",
9+
# ]
10+
# ///
11+
512
from datetime import date
613

7-
import requests
14+
import httpx
815

916
BASE_URL = "https://api.carbonintensity.org.uk/intensity"
1017

1118

1219
# Emission in the last half hour
1320
def fetch_last_half_hour() -> str:
14-
last_half_hour = requests.get(BASE_URL, timeout=10).json()["data"][0]
21+
last_half_hour = httpx.get(BASE_URL, timeout=10).json()["data"][0]
1522
return last_half_hour["intensity"]["actual"]
1623

1724

1825
# Emissions in a specific date range
1926
def fetch_from_to(start, end) -> list:
20-
return requests.get(f"{BASE_URL}/{start}/{end}", timeout=10).json()["data"]
27+
return httpx.get(f"{BASE_URL}/{start}/{end}", timeout=10).json()["data"]
2128

2229

2330
if __name__ == "__main__":

‎web_programming/covid_stats_via_xpath.py

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,17 @@
44
more convenient to use in Python web projects (e.g. Django or Flask-based)
55
"""
66

7+
# /// script
8+
# requires-python = ">=3.13"
9+
# dependencies = [
10+
# "httpx",
11+
# "lxml",
12+
# ]
13+
# ///
14+
715
from typing import NamedTuple
816

9-
import requests
17+
import httpx
1018
from lxml import html
1119

1220

@@ -19,7 +27,7 @@ class CovidData(NamedTuple):
1927
def covid_stats(url: str = "https://www.worldometers.info/coronavirus/") -> CovidData:
2028
xpath_str = '//div[@class = "maincounter-number"]/span/text()'
2129
return CovidData(
22-
*html.fromstring(requests.get(url, timeout=10).content).xpath(xpath_str)
30+
*html.fromstring(httpx.get(url, timeout=10).content).xpath(xpath_str)
2331
)
2432

2533

‎web_programming/crawl_google_results.py

Lines changed: 16 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,28 @@
1+
# /// script
2+
# requires-python = ">=3.13"
3+
# dependencies = [
4+
# "beautifulsoup4",
5+
# "fake-useragent",
6+
# "httpx",
7+
# ]
8+
# ///
9+
110
import sys
211
import webbrowser
312

4-
import requests
13+
import httpx
514
from bs4 import BeautifulSoup
615
from fake_useragent import UserAgent
716

817
if __name__ == "__main__":
918
print("Googling.....")
1019
url = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
11-
res = requests.get(url, headers={"UserAgent": UserAgent().random}, timeout=10)
20+
res = httpx.get(
21+
url,
22+
headers={"UserAgent": UserAgent().random},
23+
timeout=10,
24+
follow_redirects=True,
25+
)
1226
# res.raise_for_status()
1327
with open("project1a.html", "wb") as out_file: # only for knowing the class
1428
for data in res.iter_content(10000):

‎web_programming/crawl_google_scholar_citation.py

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,15 @@
33
using title and year of publication, and volume and pages of journal.
44
"""
55

6-
import requests
6+
# /// script
7+
# requires-python = ">=3.13"
8+
# dependencies = [
9+
# "beautifulsoup4",
10+
# "httpx",
11+
# ]
12+
# ///
13+
14+
import httpx
715
from bs4 import BeautifulSoup
816

917

@@ -12,7 +20,7 @@ def get_citation(base_url: str, params: dict) -> str:
1220
Return the citation number.
1321
"""
1422
soup = BeautifulSoup(
15-
requests.get(base_url, params=params, timeout=10).content, "html.parser"
23+
httpx.get(base_url, params=params, timeout=10).content, "html.parser"
1624
)
1725
div = soup.find("div", attrs={"class": "gs_ri"})
1826
anchors = div.find("div", attrs={"class": "gs_fl"}).find_all("a")

‎web_programming/currency_converter.py

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,9 +3,16 @@
33
https://www.amdoren.com
44
"""
55

6+
# /// script
7+
# requires-python = ">=3.13"
8+
# dependencies = [
9+
# "httpx",
10+
# ]
11+
# ///
12+
613
import os
714

8-
import requests
15+
import httpx
916

1017
URL_BASE = "https://www.amdoren.com/api/currency.php"
1118

@@ -176,7 +183,7 @@ def convert_currency(
176183
params = locals()
177184
# from is a reserved keyword
178185
params["from"] = params.pop("from_")
179-
res = requests.get(URL_BASE, params=params, timeout=10).json()
186+
res = httpx.get(URL_BASE, params=params, timeout=10).json()
180187
return str(res["amount"]) if res["error"] == 0 else res["error_message"]
181188

182189

‎web_programming/current_stock_price.py

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,12 @@
1-
import requests
1+
# /// script
2+
# requires-python = ">=3.13"
3+
# dependencies = [
4+
# "beautifulsoup4",
5+
# "httpx",
6+
# ]
7+
# ///
8+
9+
import httpx
210
from bs4 import BeautifulSoup
311

412
"""
@@ -20,8 +28,8 @@ def stock_price(symbol: str = "AAPL") -> str:
2028
True
2129
"""
2230
url = f"https://finance.yahoo.com/quote/{symbol}?p={symbol}"
23-
yahoo_finance_source = requests.get(
24-
url, headers={"USER-AGENT": "Mozilla/5.0"}, timeout=10
31+
yahoo_finance_source = httpx.get(
32+
url, headers={"USER-AGENT": "Mozilla/5.0"}, timeout=10, follow_redirects=True
2533
).text
2634
soup = BeautifulSoup(yahoo_finance_source, "html.parser")
2735

‎web_programming/current_weather.py

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,11 @@
1-
import requests
1+
# /// script
2+
# requires-python = ">=3.13"
3+
# dependencies = [
4+
# "httpx",
5+
# ]
6+
# ///
7+
8+
import httpx
29

310
# Put your API key(s) here
411
OPENWEATHERMAP_API_KEY = ""
@@ -19,13 +26,13 @@ def current_weather(location: str) -> list[dict]:
1926
weather_data = []
2027
if OPENWEATHERMAP_API_KEY:
2128
params_openweathermap = {"q": location, "appid": OPENWEATHERMAP_API_KEY}
22-
response_openweathermap = requests.get(
29+
response_openweathermap = httpx.get(
2330
OPENWEATHERMAP_URL_BASE, params=params_openweathermap, timeout=10
2431
)
2532
weather_data.append({"OpenWeatherMap": response_openweathermap.json()})
2633
if WEATHERSTACK_API_KEY:
2734
params_weatherstack = {"query": location, "access_key": WEATHERSTACK_API_KEY}
28-
response_weatherstack = requests.get(
35+
response_weatherstack = httpx.get(
2936
WEATHERSTACK_URL_BASE, params=params_weatherstack, timeout=10
3037
)
3138
weather_data.append({"Weatherstack": response_weatherstack.json()})

‎web_programming/daily_horoscope.py

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,12 @@
1-
import requests
1+
# /// script
2+
# requires-python = ">=3.13"
3+
# dependencies = [
4+
# "beautifulsoup4",
5+
# "httpx",
6+
# ]
7+
# ///
8+
9+
import httpx
210
from bs4 import BeautifulSoup
311

412

@@ -7,7 +15,7 @@ def horoscope(zodiac_sign: int, day: str) -> str:
715
"https://www.horoscope.com/us/horoscopes/general/"
816
f"horoscope-general-daily-{day}.aspx?sign={zodiac_sign}"
917
)
10-
soup = BeautifulSoup(requests.get(url, timeout=10).content, "html.parser")
18+
soup = BeautifulSoup(httpx.get(url, timeout=10).content, "html.parser")
1119
return soup.find("div", class_="main-horoscope").p.text
1220

1321

‎web_programming/download_images_from_google_query.py

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,18 @@
1+
# /// script
2+
# requires-python = ">=3.13"
3+
# dependencies = [
4+
# "beautifulsoup4",
5+
# "httpx",
6+
# ]
7+
# ///
8+
19
import json
210
import os
311
import re
412
import sys
513
import urllib.request
614

7-
import requests
15+
import httpx
816
from bs4 import BeautifulSoup
917

1018
headers = {
@@ -39,7 +47,7 @@ def download_images_from_google_query(query: str = "dhaka", max_images: int = 5)
3947
"ijn": "0",
4048
}
4149

42-
html = requests.get(
50+
html = httpx.get(
4351
"https://www.google.com/search", params=params, headers=headers, timeout=10
4452
)
4553
soup = BeautifulSoup(html.text, "html.parser")

‎web_programming/emails_from_url.py

Lines changed: 17 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,12 @@
11
"""Get the site emails from URL."""
22

3+
# /// script
4+
# requires-python = ">=3.13"
5+
# dependencies = [
6+
# "httpx",
7+
# ]
8+
# ///
9+
310
from __future__ import annotations
411

512
__author__ = "Muhammad Umer Farooq"
@@ -13,7 +20,7 @@
1320
from html.parser import HTMLParser
1421
from urllib import parse
1522

16-
import requests
23+
import httpx
1724

1825

1926
class Parser(HTMLParser):
@@ -72,7 +79,7 @@ def emails_from_url(url: str = "https://github.com") -> list[str]:
7279

7380
try:
7481
# Open URL
75-
r = requests.get(url, timeout=10)
82+
r = httpx.get(url, timeout=10, follow_redirects=True)
7683

7784
# pass the raw HTML to the parser to get links
7885
parser.feed(r.text)
@@ -81,9 +88,15 @@ def emails_from_url(url: str = "https://github.com") -> list[str]:
8188
valid_emails = set()
8289
for link in parser.urls:
8390
# open URL.
84-
# read = requests.get(link)
91+
# Check if the link is already absolute
92+
if not link.startswith("http://") and not link.startswith("https://"):
93+
# Prepend protocol only if link starts with domain, normalize otherwise
94+
if link.startswith(domain):
95+
link = f"https://{link}"
96+
else:
97+
link = parse.urljoin(f"https://{domain}", link)
8598
try:
86-
read = requests.get(link, timeout=10)
99+
read = httpx.get(link, timeout=10, follow_redirects=True)
87100
# Get the valid email.
88101
emails = re.findall("[a-zA-Z0-9]+@" + domain, read.text)
89102
# If not in list then append it.

‎web_programming/fetch_anime_and_play.py

Lines changed: 15 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,17 @@
1-
import requests
1+
# /// script
2+
# requires-python = ">=3.13"
3+
# dependencies = [
4+
# "beautifulsoup4",
5+
# "fake-useragent",
6+
# "httpx",
7+
# ]
8+
# ///
9+
10+
import httpx
211
from bs4 import BeautifulSoup, NavigableString, Tag
312
from fake_useragent import UserAgent
413

5-
BASE_URL = "https://ww1.gogoanime2.org"
14+
BASE_URL = "https://ww7.gogoanime2.org"
615

716

817
def search_scraper(anime_name: str) -> list:
@@ -25,9 +34,9 @@ def search_scraper(anime_name: str) -> list:
2534
"""
2635

2736
# concat the name to form the search url.
28-
search_url = f"{BASE_URL}/search/{anime_name}"
37+
search_url = f"{BASE_URL}/search?keyword={anime_name}"
2938

30-
response = requests.get(
39+
response = httpx.get(
3140
search_url, headers={"UserAgent": UserAgent().chrome}, timeout=10
3241
) # request the url.
3342

@@ -82,7 +91,7 @@ def search_anime_episode_list(episode_endpoint: str) -> list:
8291

8392
request_url = f"{BASE_URL}{episode_endpoint}"
8493

85-
response = requests.get(
94+
response = httpx.get(
8695
url=request_url, headers={"UserAgent": UserAgent().chrome}, timeout=10
8796
)
8897
response.raise_for_status()
@@ -133,7 +142,7 @@ def get_anime_episode(episode_endpoint: str) -> list:
133142

134143
episode_page_url = f"{BASE_URL}{episode_endpoint}"
135144

136-
response = requests.get(
145+
response = httpx.get(
137146
url=episode_page_url, headers={"User-Agent": UserAgent().chrome}, timeout=10
138147
)
139148
response.raise_for_status()

‎web_programming/fetch_bbc_news.py

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,20 @@
11
# Created by sarathkaul on 12/11/19
22

3-
import requests
3+
# /// script
4+
# requires-python = ">=3.13"
5+
# dependencies = [
6+
# "httpx",
7+
# ]
8+
# ///
9+
10+
import httpx
411

512
_NEWS_API = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
613

714

815
def fetch_bbc_news(bbc_news_api_key: str) -> None:
916
# fetching a list of articles in json format
10-
bbc_news_page = requests.get(_NEWS_API + bbc_news_api_key, timeout=10).json()
17+
bbc_news_page = httpx.get(_NEWS_API + bbc_news_api_key, timeout=10).json()
1118
# each article in the list is a dict
1219
for i, article in enumerate(bbc_news_page["articles"], 1):
1320
print(f"{i}.) {article['title']}")

‎web_programming/fetch_github_info.py

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -18,12 +18,19 @@
1818
export USER_TOKEN=""
1919
"""
2020

21+
# /// script
22+
# requires-python = ">=3.13"
23+
# dependencies = [
24+
# "httpx",
25+
# ]
26+
# ///
27+
2128
from __future__ import annotations
2229

2330
import os
2431
from typing import Any
2532

26-
import requests
33+
import httpx
2734

2835
BASE_URL = "https://api.github.com"
2936

@@ -36,13 +43,13 @@
3643

3744
def fetch_github_info(auth_token: str) -> dict[Any, Any]:
3845
"""
39-
Fetch GitHub info of a user using the requests module
46+
Fetch GitHub info of a user using the httpx module
4047
"""
4148
headers = {
4249
"Authorization": f"token {auth_token}",
4350
"Accept": "application/vnd.github.v3+json",
4451
}
45-
return requests.get(AUTHENTICATED_USER_ENDPOINT, headers=headers, timeout=10).json()
52+
return httpx.get(AUTHENTICATED_USER_ENDPOINT, headers=headers, timeout=10).json()
4653

4754

4855
if __name__ == "__main__": # pragma: no cover

‎web_programming/fetch_jobs.py

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2,20 +2,26 @@
22
Scraping jobs given job title and location from indeed website
33
"""
44

5+
# /// script
6+
# requires-python = ">=3.13"
7+
# dependencies = [
8+
# "beautifulsoup4",
9+
# "httpx",
10+
# ]
11+
# ///
12+
513
from __future__ import annotations
614

715
from collections.abc import Generator
816

9-
import requests
17+
import httpx
1018
from bs4 import BeautifulSoup
1119

1220
url = "https://www.indeed.co.in/jobs?q=mobile+app+development&l="
1321

1422

1523
def fetch_jobs(location: str = "mumbai") -> Generator[tuple[str, str]]:
16-
soup = BeautifulSoup(
17-
requests.get(url + location, timeout=10).content, "html.parser"
18-
)
24+
soup = BeautifulSoup(httpx.get(url + location, timeout=10).content, "html.parser")
1925
# This attribute finds out all the specifics listed in a job
2026
for job in soup.find_all("div", attrs={"data-tn-component": "organicJob"}):
2127
job_title = job.find("a", attrs={"data-tn-element": "jobTitle"}).text.strip()

‎web_programming/fetch_quotes.py

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,19 +6,26 @@
66
https://zenquotes.io/
77
"""
88

9+
# /// script
10+
# requires-python = ">=3.13"
11+
# dependencies = [
12+
# "httpx",
13+
# ]
14+
# ///
15+
916
import pprint
1017

11-
import requests
18+
import httpx
1219

1320
API_ENDPOINT_URL = "https://zenquotes.io/api"
1421

1522

1623
def quote_of_the_day() -> list:
17-
return requests.get(API_ENDPOINT_URL + "/today", timeout=10).json()
24+
return httpx.get(API_ENDPOINT_URL + "/today", timeout=10).json()
1825

1926

2027
def random_quotes() -> list:
21-
return requests.get(API_ENDPOINT_URL + "/random", timeout=10).json()
28+
return httpx.get(API_ENDPOINT_URL + "/random", timeout=10).json()
2229

2330

2431
if __name__ == "__main__":

‎web_programming/get_amazon_product_data.py

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,18 @@
44
information will include title, URL, price, ratings, and the discount available.
55
"""
66

7+
# /// script
8+
# requires-python = ">=3.13"
9+
# dependencies = [
10+
# "beautifulsoup4",
11+
# "httpx",
12+
# "pandas",
13+
# ]
14+
# ///
15+
716
from itertools import zip_longest
817

9-
import requests
18+
import httpx
1019
from bs4 import BeautifulSoup
1120
from pandas import DataFrame
1221

@@ -25,7 +34,7 @@ def get_amazon_product_data(product: str = "laptop") -> DataFrame:
2534
"Accept-Language": "en-US, en;q=0.5",
2635
}
2736
soup = BeautifulSoup(
28-
requests.get(url, headers=header, timeout=10).text, features="lxml"
37+
httpx.get(url, headers=header, timeout=10).text, features="lxml"
2938
)
3039
# Initialize a Pandas dataframe with the column titles
3140
data_frame = DataFrame(

‎web_programming/get_imdb_top_250_movies_csv.py

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,24 @@
1+
# /// script
2+
# requires-python = ">=3.13"
3+
# dependencies = [
4+
# "beautifulsoup4",
5+
# "httpx",
6+
# ]
7+
# ///
8+
19
from __future__ import annotations
210

311
import csv
412

5-
import requests
13+
import httpx
614
from bs4 import BeautifulSoup
715

816

917
def get_imdb_top_250_movies(url: str = "") -> dict[str, float]:
1018
url = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250"
11-
soup = BeautifulSoup(requests.get(url, timeout=10).text, "html.parser")
12-
titles = soup.find_all("td", attrs="titleColumn")
13-
ratings = soup.find_all("td", class_="ratingColumn imdbRating")
19+
soup = BeautifulSoup(httpx.get(url, timeout=10).text, "html.parser")
20+
titles = soup.find_all("h3", class_="ipc-title__text")
21+
ratings = soup.find_all("span", class_="ipc-rating-star--rating")
1422
return {
1523
title.a.text: float(rating.strong.text)
1624
for title, rating in zip(titles, ratings)

‎web_programming/get_ip_geolocation.py

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,11 @@
1-
import requests
1+
# /// script
2+
# requires-python = ">=3.13"
3+
# dependencies = [
4+
# "httpx",
5+
# ]
6+
# ///
7+
8+
import httpx
29

310

411
# Function to get geolocation data for an IP address
@@ -8,7 +15,7 @@ def get_ip_geolocation(ip_address: str) -> str:
815
url = f"https://ipinfo.io/{ip_address}/json"
916

1017
# Send a GET request to the API
11-
response = requests.get(url, timeout=10)
18+
response = httpx.get(url, timeout=10)
1219

1320
# Check if the HTTP request was successful
1421
response.raise_for_status()
@@ -23,7 +30,7 @@ def get_ip_geolocation(ip_address: str) -> str:
2330
location = "Location data not found."
2431

2532
return location
26-
except requests.exceptions.RequestException as e:
33+
except httpx.RequestError as e:
2734
# Handle network-related exceptions
2835
return f"Request error: {e}"
2936
except ValueError as e:

‎web_programming/get_top_billionaires.py

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,9 +3,17 @@
33
This works for some of us but fails for others.
44
"""
55

6+
# /// script
7+
# requires-python = ">=3.13"
8+
# dependencies = [
9+
# "httpx",
10+
# "rich",
11+
# ]
12+
# ///
13+
614
from datetime import UTC, date, datetime
715

8-
import requests
16+
import httpx
917
from rich import box
1018
from rich import console as rich_console
1119
from rich import table as rich_table
@@ -57,7 +65,7 @@ def get_forbes_real_time_billionaires() -> list[dict[str, int | str]]:
5765
Returns:
5866
List of top 10 realtime billionaires data.
5967
"""
60-
response_json = requests.get(API_URL, timeout=10).json()
68+
response_json = httpx.get(API_URL, timeout=10).json()
6169
return [
6270
{
6371
"Name": person["personName"],

‎web_programming/get_top_hn_posts.py

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,19 +1,26 @@
1+
# /// script
2+
# requires-python = ">=3.13"
3+
# dependencies = [
4+
# "httpx",
5+
# ]
6+
# ///
7+
18
from __future__ import annotations
29

3-
import requests
10+
import httpx
411

512

613
def get_hackernews_story(story_id: str) -> dict:
714
url = f"https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"
8-
return requests.get(url, timeout=10).json()
15+
return httpx.get(url, timeout=10).json()
916

1017

1118
def hackernews_top_stories(max_stories: int = 10) -> list[dict]:
1219
"""
1320
Get the top max_stories posts from HackerNews - https://news.ycombinator.com/
1421
"""
1522
url = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
16-
story_ids = requests.get(url, timeout=10).json()[:max_stories]
23+
story_ids = httpx.get(url, timeout=10).json()[:max_stories]
1724
return [get_hackernews_story(story_id) for story_id in story_ids]
1825

1926

‎web_programming/giphy.py

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,13 @@
11
#!/usr/bin/env python3
2-
import requests
2+
3+
# /// script
4+
# requires-python = ">=3.13"
5+
# dependencies = [
6+
# "httpx",
7+
# ]
8+
# ///
9+
10+
import httpx
311

412
giphy_api_key = "YOUR API KEY"
513
# Can be fetched from https://developers.giphy.com/dashboard/
@@ -11,7 +19,7 @@ def get_gifs(query: str, api_key: str = giphy_api_key) -> list:
1119
"""
1220
formatted_query = "+".join(query.split())
1321
url = f"https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"
14-
gifs = requests.get(url, timeout=10).json()["data"]
22+
gifs = httpx.get(url, timeout=10).json()["data"]
1523
return [gif["url"] for gif in gifs]
1624

1725

‎web_programming/instagram_crawler.py

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,19 @@
11
#!/usr/bin/env python3
2+
3+
# /// script
4+
# requires-python = ">=3.13"
5+
# dependencies = [
6+
# "beautifulsoup4",
7+
# "fake-useragent",
8+
# "httpx",
9+
# ]
10+
# ///
11+
212
from __future__ import annotations
313

414
import json
515

6-
import requests
16+
import httpx
717
from bs4 import BeautifulSoup
818
from fake_useragent import UserAgent
919

@@ -39,7 +49,7 @@ def get_json(self) -> dict:
3949
"""
4050
Return a dict of user information
4151
"""
42-
html = requests.get(self.url, headers=headers, timeout=10).text
52+
html = httpx.get(self.url, headers=headers, timeout=10).text
4353
scripts = BeautifulSoup(html, "html.parser").find_all("script")
4454
try:
4555
return extract_user_profile(scripts[4])

‎web_programming/instagram_pic.py

Lines changed: 14 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,14 @@
1+
# /// script
2+
# requires-python = ">=3.13"
3+
# dependencies = [
4+
# "beautifulsoup4",
5+
# "httpx",
6+
# ]
7+
# ///
8+
19
from datetime import UTC, datetime
210

3-
import requests
11+
import httpx
412
from bs4 import BeautifulSoup
513

614

@@ -15,9 +23,9 @@ def download_image(url: str) -> str:
1523
A message indicating the result of the operation.
1624
"""
1725
try:
18-
response = requests.get(url, timeout=10)
26+
response = httpx.get(url, timeout=10)
1927
response.raise_for_status()
20-
except requests.exceptions.RequestException as e:
28+
except httpx.RequestError as e:
2129
return f"An error occurred during the HTTP request to {url}: {e!r}"
2230

2331
soup = BeautifulSoup(response.text, "html.parser")
@@ -30,13 +38,13 @@ def download_image(url: str) -> str:
3038
return f"Image URL not found in meta tag {image_meta_tag}."
3139

3240
try:
33-
image_data = requests.get(image_url, timeout=10).content
34-
except requests.exceptions.RequestException as e:
41+
image_data = httpx.get(image_url, timeout=10).content
42+
except httpx.RequestError as e:
3543
return f"An error occurred during the HTTP request to {image_url}: {e!r}"
3644
if not image_data:
3745
return f"Failed to download the image from {image_url}."
3846

39-
file_name = f"{datetime.now(tz=UTC).astimezone():%Y-%m-%d_%H:%M:%S}.jpg"
47+
file_name = f"{datetime.now(tz=UTC).astimezone():%Y-%m-%d_%H-%M-%S}.jpg"
4048
with open(file_name, "wb") as out_file:
4149
out_file.write(image_data)
4250
return f"Image downloaded and saved in the file {file_name}"

‎web_programming/instagram_video.py

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,17 +1,24 @@
1+
# /// script
2+
# requires-python = ">=3.13"
3+
# dependencies = [
4+
# "httpx",
5+
# ]
6+
# ///
7+
18
from datetime import UTC, datetime
29

3-
import requests
10+
import httpx
411

512

613
def download_video(url: str) -> bytes:
714
base_url = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url="
8-
video_url = requests.get(base_url + url, timeout=10).json()[0]["urls"][0]["src"]
9-
return requests.get(video_url, timeout=10).content
15+
video_url = httpx.get(base_url + url, timeout=10)
16+
return httpx.get(video_url, timeout=10).content
1017

1118

1219
if __name__ == "__main__":
1320
url = input("Enter Video/IGTV url: ").strip()
14-
file_name = f"{datetime.now(tz=UTC).astimezone():%Y-%m-%d_%H:%M:%S}.mp4"
21+
file_name = f"{datetime.now(tz=UTC).astimezone():%Y-%m-%d_%H-%M-%S}.mp4"
1522
with open(file_name, "wb") as fp:
1623
fp.write(download_video(url))
1724
print(f"Done. Video saved to disk as {file_name}.")

‎web_programming/nasa_data.py

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,11 @@
1-
import shutil
1+
# /// script
2+
# requires-python = ">=3.13"
3+
# dependencies = [
4+
# "httpx",
5+
# ]
6+
# ///
27

3-
import requests
8+
import httpx
49

510

611
def get_apod_data(api_key: str) -> dict:
@@ -9,17 +14,17 @@ def get_apod_data(api_key: str) -> dict:
914
Get your API Key from: https://api.nasa.gov/
1015
"""
1116
url = "https://api.nasa.gov/planetary/apod"
12-
return requests.get(url, params={"api_key": api_key}, timeout=10).json()
17+
return httpx.get(url, params={"api_key": api_key}, timeout=10).json()
1318

1419

1520
def save_apod(api_key: str, path: str = ".") -> dict:
1621
apod_data = get_apod_data(api_key)
1722
img_url = apod_data["url"]
1823
img_name = img_url.split("/")[-1]
19-
response = requests.get(img_url, stream=True, timeout=10)
24+
response = httpx.get(img_url, timeout=10)
2025

2126
with open(f"{path}/{img_name}", "wb+") as img_file:
22-
shutil.copyfileobj(response.raw, img_file)
27+
img_file.write(response.content)
2328
del response
2429
return apod_data
2530

@@ -29,7 +34,7 @@ def get_archive_data(query: str) -> dict:
2934
Get the data of a particular query from NASA archives
3035
"""
3136
url = "https://images-api.nasa.gov/search"
32-
return requests.get(url, params={"q": query}, timeout=10).json()
37+
return httpx.get(url, params={"q": query}, timeout=10).json()
3338

3439

3540
if __name__ == "__main__":
Lines changed: 13 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,17 @@
1+
# /// script
2+
# requires-python = ">=3.13"
3+
# dependencies = [
4+
# "beautifulsoup4",
5+
# "fake-useragent",
6+
# "httpx",
7+
# ]
8+
# ///
9+
110
import webbrowser
211
from sys import argv
312
from urllib.parse import parse_qs, quote
413

5-
import requests
14+
import httpx
615
from bs4 import BeautifulSoup
716
from fake_useragent import UserAgent
817

@@ -13,26 +22,18 @@
1322

1423
url = f"https://www.google.com/search?q={query}&num=100"
1524

16-
res = requests.get(
25+
res = httpx.get(
1726
url,
1827
headers={"User-Agent": str(UserAgent().random)},
1928
timeout=10,
2029
)
2130

2231
try:
23-
link = (
24-
BeautifulSoup(res.text, "html.parser")
25-
.find("div", attrs={"class": "yuRUbf"})
26-
.find("a")
27-
.get("href")
28-
)
32+
link = BeautifulSoup(res.text, "html.parser").find("div").find("a").get("href")
2933

3034
except AttributeError:
3135
link = parse_qs(
32-
BeautifulSoup(res.text, "html.parser")
33-
.find("div", attrs={"class": "kCrYT"})
34-
.find("a")
35-
.get("href")
36+
BeautifulSoup(res.text, "html.parser").find("div").find("a").get("href")
3637
)["url"][0]
3738

3839
webbrowser.open(link)

‎web_programming/random_anime_character.py

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,15 @@
1+
# /// script
2+
# requires-python = ">=3.13"
3+
# dependencies = [
4+
# "beautifulsoup4",
5+
# "fake-useragent",
6+
# "httpx",
7+
# ]
8+
# ///
9+
110
import os
211

3-
import requests
12+
import httpx
413
from bs4 import BeautifulSoup
514
from fake_useragent import UserAgent
615

@@ -12,7 +21,7 @@ def save_image(image_url: str, image_title: str) -> None:
1221
"""
1322
Saves the image of anime character
1423
"""
15-
image = requests.get(image_url, headers=headers, timeout=10)
24+
image = httpx.get(image_url, headers=headers, timeout=10)
1625
with open(image_title, "wb") as file:
1726
file.write(image.content)
1827

@@ -22,7 +31,7 @@ def random_anime_character() -> tuple[str, str, str]:
2231
Returns the Title, Description, and Image Title of a random anime character .
2332
"""
2433
soup = BeautifulSoup(
25-
requests.get(URL, headers=headers, timeout=10).text, "html.parser"
34+
httpx.get(URL, headers=headers, timeout=10).text, "html.parser"
2635
)
2736
title = soup.find("meta", attrs={"property": "og:title"}).attrs["content"]
2837
image_url = soup.find("meta", attrs={"property": "og:image"}).attrs["content"]

‎web_programming/recaptcha_verification.py

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,14 @@
3232
recaptcha verification.
3333
"""
3434

35-
import requests
35+
# /// script
36+
# requires-python = ">=3.13"
37+
# dependencies = [
38+
# "httpx",
39+
# ]
40+
# ///
41+
42+
import httpx
3643

3744
try:
3845
from django.contrib.auth import authenticate, login
@@ -56,7 +63,7 @@ def login_using_recaptcha(request):
5663
client_key = request.POST.get("g-recaptcha-response")
5764

5865
# post recaptcha response to Google's recaptcha api
59-
response = requests.post(
66+
response = httpx.post(
6067
url, data={"secret": secret_key, "response": client_key}, timeout=10
6168
)
6269
# if the recaptcha api verified our keys

‎web_programming/reddit.py

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,13 @@
1+
# /// script
2+
# requires-python = ">=3.13"
3+
# dependencies = [
4+
# "httpx",
5+
# ]
6+
# ///
7+
18
from __future__ import annotations
29

3-
import requests
10+
import httpx
411

512
valid_terms = set(
613
"""approved_at_utc approved_by author_flair_background_color
@@ -28,13 +35,14 @@ def get_subreddit_data(
2835
if invalid_search_terms := ", ".join(sorted(set(wanted_data) - valid_terms)):
2936
msg = f"Invalid search term: {invalid_search_terms}"
3037
raise ValueError(msg)
31-
response = requests.get(
32-
f"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}",
38+
response = httpx.get(
39+
f"https://www.reddit.com/r/{subreddit}/{age}.json?limit={limit}",
3340
headers={"User-agent": "A random string"},
3441
timeout=10,
3542
)
43+
response.raise_for_status()
3644
if response.status_code == 429:
37-
raise requests.HTTPError(response=response)
45+
raise httpx.HTTPError(response=response)
3846

3947
data = response.json()
4048
if not wanted_data:

‎web_programming/search_books_by_isbn.py

Lines changed: 14 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,16 @@
44
ISBN: https://en.wikipedia.org/wiki/International_Standard_Book_Number
55
"""
66

7-
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
7+
# /// script
8+
# requires-python = ">=3.13"
9+
# dependencies = [
10+
# "httpx",
11+
# ]
12+
# ///
813

9-
import requests
14+
from json import JSONDecodeError
15+
16+
import httpx
1017

1118

1219
def get_openlibrary_data(olid: str = "isbn/0140328726") -> dict:
@@ -25,7 +32,9 @@ def get_openlibrary_data(olid: str = "isbn/0140328726") -> dict:
2532
if new_olid.count("/") != 1:
2633
msg = f"{olid} is not a valid Open Library olid"
2734
raise ValueError(msg)
28-
return requests.get(f"https://openlibrary.org/{new_olid}.json", timeout=10).json()
35+
return httpx.get(
36+
f"https://openlibrary.org/{new_olid}.json", timeout=10, follow_redirects=True
37+
).json()
2938

3039

3140
def summarize_book(ol_book_data: dict) -> dict:
@@ -36,16 +45,14 @@ def summarize_book(ol_book_data: dict) -> dict:
3645
"title": "Title",
3746
"publish_date": "Publish date",
3847
"authors": "Authors",
39-
"number_of_pages": "Number of pages:",
40-
"first_sentence": "First sentence",
48+
"number_of_pages": "Number of pages",
4149
"isbn_10": "ISBN (10)",
4250
"isbn_13": "ISBN (13)",
4351
}
4452
data = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
4553
data["Authors"] = [
4654
get_openlibrary_data(author["key"])["name"] for author in data["Authors"]
4755
]
48-
data["First sentence"] = data["First sentence"]["value"]
4956
for key, value in data.items():
5057
if isinstance(value, list):
5158
data[key] = ", ".join(value)
@@ -71,5 +78,5 @@ def summarize_book(ol_book_data: dict) -> dict:
7178
try:
7279
book_summary = summarize_book(get_openlibrary_data(f"isbn/{isbn}"))
7380
print("\n".join(f"{key}: {value}" for key, value in book_summary.items()))
74-
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
81+
except JSONDecodeError:
7582
print(f"Sorry, there are no results for ISBN: {isbn}.")

‎web_programming/slack_message.py

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,18 @@
11
# Created by sarathkaul on 12/11/19
22

3-
import requests
3+
# /// script
4+
# requires-python = ">=3.13"
5+
# dependencies = [
6+
# "httpx",
7+
# ]
8+
# ///
9+
10+
import httpx
411

512

613
def send_slack_message(message_body: str, slack_url: str) -> None:
714
headers = {"Content-Type": "application/json"}
8-
response = requests.post(
15+
response = httpx.post(
916
slack_url, json={"text": message_body}, headers=headers, timeout=10
1017
)
1118
if response.status_code != 200:

‎web_programming/test_fetch_github_info.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import json
22

3-
import requests
3+
import httpx
44

55
from .fetch_github_info import AUTHENTICATED_USER_ENDPOINT, fetch_github_info
66

@@ -21,7 +21,7 @@ def mock_response(*args, **kwargs):
2121
assert "Accept" in kwargs["headers"]
2222
return FakeResponse(b'{"login":"test","id":1}')
2323

24-
monkeypatch.setattr(requests, "get", mock_response)
24+
monkeypatch.setattr(httpx, "get", mock_response)
2525
result = fetch_github_info("token")
2626
assert result["login"] == "test"
2727
assert result["id"] == 1

‎web_programming/world_covid19_stats.py

Lines changed: 19 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -5,19 +5,31 @@
55
This data is being scrapped from 'https://www.worldometers.info/coronavirus/'.
66
"""
77

8-
import requests
8+
# /// script
9+
# requires-python = ">=3.13"
10+
# dependencies = [
11+
# "beautifulsoup4",
12+
# "httpx",
13+
# ]
14+
# ///
15+
16+
import httpx
917
from bs4 import BeautifulSoup
1018

1119

12-
def world_covid19_stats(url: str = "https://www.worldometers.info/coronavirus") -> dict:
20+
def world_covid19_stats(
21+
url: str = "https://www.worldometers.info/coronavirus/",
22+
) -> dict:
1323
"""
1424
Return a dict of current worldwide COVID-19 statistics
1525
"""
16-
soup = BeautifulSoup(requests.get(url, timeout=10).text, "html.parser")
17-
keys = soup.findAll("h1")
18-
values = soup.findAll("div", {"class": "maincounter-number"})
19-
keys += soup.findAll("span", {"class": "panel-title"})
20-
values += soup.findAll("div", {"class": "number-table-main"})
26+
soup = BeautifulSoup(
27+
httpx.get(url, timeout=10, follow_redirects=True).text, "html.parser"
28+
)
29+
keys = soup.find_all("h1")
30+
values = soup.find_all("div", {"class": "maincounter-number"})
31+
keys += soup.find_all("span", {"class": "panel-title"})
32+
values += soup.find_all("div", {"class": "number-table-main"})
2133
return {key.text.strip(): value.text.strip() for key, value in zip(keys, values)}
2234

2335

0 commit comments

Comments
 (0)
Please sign in to comment.