1 Commits

Author SHA1 Message Date
Mara Karagianni
ec22d563e8 testing 2024-09-26 17:45:35 +02:00
8 changed files with 1 additions and 164 deletions

10
.gitignore vendored
View File

@@ -1,10 +0,0 @@
# Environments
venv/
.venv/
/pyvenv.cfg
.python-version
# Media
media/
downloaded_images
downloaded_videos

View File

@@ -11,5 +11,6 @@ test port ssh
` git pull `
## content
test

View File

@@ -1,16 +0,0 @@
## introduction
```
variables
list --> bisous.py programming.py
input --> bisous.py
print
for loop --> bisous.py
conditional statements if/else/elif --> bisous.py
break --> bisous.py
while loop --> missing.py
dictionary
enumerate
return --> missing.py
random --> programming.py
function --> missing.py
```

View File

@@ -1,19 +0,0 @@
# Initialiser les variables
queer = "mon amour"
bisous = ["ma biche", "mom bébé", "mon amour", "mon chéri.e"]
# Demander une saisie à l'utilisateur
amoureuxse = input("Entrez le nom de votre bien-aiméx : ")
# Boucler à travers la liste et imprimer le message correspondant
for bisou in bisous:
if bisou == queer:
print("bisou pour toi", bisou, amoureuxse)
elif amoureuxse == "python":
print("on dirait un.e geek")
break
else:
print(f":* :* {bisou}, {amoureuxse}")

View File

@@ -1,12 +0,0 @@
from time import sleep
love = True
how = "so"
def missing(so):
print(f"I miss you {so} much")
while love:
missing(how)
how += " so"
sleep(0.2)

View File

@@ -1,25 +0,0 @@
"""
poem converted from bash programming.sh by Winnie Soon, modified from The House of Dust, 1967 Alison Knowles and James Tenney
"""
import random
import time
# listes for different elements
kisses = ["DEAREST", "SWEETHEART", "WORLD", "DARLING", "BABY", "LOVE", "MONKEY", "SUGAR", "LITTLE PRINCE"]
material = ["SAND", "DUST", "LEAVES", "PAPER", "TIN", "ROOTS", "BRICK", "STONE", "DISCARDED CLOTHING", "GLASS", "STEEL", "PLASTIC", "MUD", "BROKEN DISHES", "WOOD", "STRAW", "WEEDS", "FOREST"]
location = ["IN A GREEN, MOSSY TERRAIN", "IN AN OVERPOPULATED AREA", "BY THE SEA", "BY AN ABANDONED LAKE", "IN A DESERTED FACTORY", "IN DENSE WOODS", "IN JAPAN", "AMONG SMALL HILLS", "IN SOUTHERN FRANCE", "AMONG HIGH MOUNTAINS", "ON AN ISLAND", "IN A COLD, WINDY CLIMATE", "IN A PLACE WITH BOTH HEAVY RAIN AND BRIGHT SUN", "IN A DESERTED AIRPORT", "IN A HOT CLIMATE", "INSIDE A MOUNTAIN", "ON THE SEA", "IN MICHIGAN", "IN HEAVY JUNGLE UNDERGROWTH", "BY A RIVER", "AMONG OTHER HOUSES", "IN A DESERTED CHURCH", "IN A METROPOLIS", "UNDERWATER", "ON THE SCREEN", "ON THE ROAD"]
light_source = ["CANDLES", "ALL AVAILABLE LIGHTING", "ELECTRICITY", "NATURAL LIGHT", "LEDS", "MOON LIGHT", "THE SMALL TORCH"]
inhabitants = ["PEOPLE WHO SLEEP VERY LITTLE", "VEGETARIANS", "HORSES AND BIRDS", "PEOPLE SPEAKING MANY LANGUAGES WEARING LITTLE OR NO CLOTHING", "CHILDREN AND OLD PEOPLE", "VARIOUS BIRDS AND FISH", "LOVERS", "PEOPLE WHO ENJOY EATING TOGETHER", "PEOPLE WHO EAT A GREAT DEAL", "COLLECTORS OF ALL TYPES", "FRIENDS AND ENEMIES", "PEOPLE WHO SLEEP ALMOST ALL THE TIME", "VERY TALL PEOPLE", "AMERICAN INDIANS", "LITTLE BOYS", "PEOPLE FROM MANY WALKS OF LIFE", "FRIENDS", "FRENCH AND GERMAN SPEAKING PEOPLE", "FISHERMEN AND FAMILIES", "PEOPLE WHO LOVE TO READ", "CHEERFUL KIDS", "QUEER LOVERS", "NAUGHTY MONKEYS", "KIDDOS"]
# Infinite loop
while True:
print("HELLO", random.choice(kisses))
print(" A TERMINAL OF BLACK", random.choice(material))
print(" ", random.choice(location))
print(" PROGRAMMING", random.choice(light_source))
print(" KISSED BY", random.choice(inhabitants))
print(" ")
# Delay for 3.5 seconds
time.sleep(3.5)

View File

@@ -1,16 +0,0 @@
## Un script qui extrait des images depuis une URL donnée
Nous devons installer:
```
pip install requests beautifulsoup4 tldextract
```
Exécutez le script avec :
```
python get_images.py https://www.freepik.com/images
```
Remplacez lURL par le lien que vous souhaitez extraire.
**Remarque:** Le scraping doit être effectué de manière éthique, en respectant les règles du fichier robots.txt et les conditions d'utilisation du site.

View File

@@ -1,66 +0,0 @@
import requests
import time
from bs4 import BeautifulSoup
from urllib.parse import urlparse
import os
import sys
import tldextract
# URL of the webpage with images
input_url = sys.argv[1]
# extract full domain
def split_domain_or_subdomain_and_path(url):
# Parse the URL
parsed_url = urlparse(url)
extracted = tldextract.extract(url)
# Build the full domain, including subdomain if present
if extracted.subdomain:
full_domain = f"{extracted.subdomain}.{extracted.domain}.{extracted.suffix}"
else:
full_domain = f"{extracted.domain}.{extracted.suffix}"
return "https://" + full_domain
full_domain = split_domain_or_subdomain_and_path(input_url)
print(f"Domain/Subdomain: {full_domain}")
# Folder to save images
save_folder = "downloaded_images"
if not os.path.exists(save_folder):
os.makedirs(save_folder)
# Send GET request to the page
response = requests.get(input_url)
if response.status_code == 200:
# Parse the HTML content with BeautifulSoup
soup = BeautifulSoup(response.text, 'html.parser')
# Find all image tags
images = soup.find_all('img')
# Loop through image tags
for idx, img in enumerate(images):
img_url = img.get('src')
# Check if img_url is complete; if not, adjust it accordingly
if not img_url.startswith("http"):
img_url = full_domain + "/" + img_url
try:
# Send request to the image URL
img_data = requests.get(img_url).content
# Define file name and path
img_name = os.path.join(save_folder, f"image_{idx}.jpg")
# Write image data to file
with open(img_name, 'wb') as handler:
handler.write(img_data)
print(f"Downloaded {img_name}")
time.sleep(1)
except Exception as e:
print(f"Failed to download {img_url}. Error: {e}")
else:
print("Failed to retrieve the page.")