[1] B. Liu, "Sentiment Analysis and Opinion Mining," Synthesis Lectures on Human-Centered Informatics, vol. 5, no. 1, pp. 1-167, 2012.
[2] FIFA, "FIFA Women's Football Strategy 2020-2026," 2021. [Online].
Available: https://www.fifa.com. [Accessed: Oct. 2023].
[3] Nielsen Sports, "The Future of Football in Southeast Asia," 2020. [Online].
Available: https://www.nielsen.com. [Accessed: Oct. 2023].
[4] M. F. Y. Herjanto and C. Carudin, “Analisis Sentimen Ulasan Pengguna Aplikasi Sirekap Pada Play Store Menggunakan Algoritma Random Forest Classifer,” J. Inform. dan Tek. Elektro Terap., vol. 12, no. 2, pp. 1204–1210, 2024, doi: 10.23960/jitet.v12i2.4192.
[5] M. R. Fajriansyah and Siswanto, “Analisis Sentimen Pengguna Twitter Terhadap Partai Politik Pendukung Calon Gubernur Di Jakarta Menggunakan Algoritma C4 . 5 Decision Tree Learning,” Skanika, vol. 1, no. 2, pp. 697–703, 2018, [Online]. Available:
https://jom.fti.budiluhur.ac.id/index.php/SKANIKA/article/view/278
[6] Cindy Caterine Yolanda, Syafriandi Syafriandi, Yenni Kurniawati, and Dina Fitria, “Sentiment Analysis of DANA Application Reviews on Google Play Store Using Naïve Bayes Classifier Algorithm Based on Information Gain,”
UNP J. Stat. Data Sci., vol. 2, no. 1, pp. 48–55, 2024, doi:
10.24036/ujsds/vol2-iss1/147.
[7] T. B. Rohman, D. Dwi Purwanto, and J. Santoso, “Sentiment Analysis Terhadap Review Rumah Makan di Surabaya Memanfaatkan Algoritma Random Forest,” Fak. Sist. Inf., vol. 60284, 2018.
[8] R. L. Nurdiansyah and K. E. Dewi, “Pengaruh Information Gain Dan Normalisasi Kata Pada Analisis Sentimen Berbasis Aspek,” KOMPUTA J.
Ilm. Komput. dan Inform., vol. 12, no. 2, 2023.
[9] W. Parasati, F. A. Bachtiar, and N. Y. Setiawan, "Analisis Sentimen Berbasis Aspek pada Ulasan Pelanggan Restoran Bakso President Malang dengan Metode Naïve Bayes Classifier," J. Teknol. Inf. dan Ilmu Komputer, vol.
2020, pp. 1-10, 2020. Available:
http://j-ptiik.ub.ac.id/index.php/j-ptiik/article/view/7134.
[10] H. A. R. Harpizon, R. Kurniawan, and others, "Analisis Sentimen Komentar Di YouTube Tentang Ceramah Ustadz Abdul Somad Menggunakan Algoritma Naïve Bayes," Repository UIN Suska, 2022. Available:
https://repository.uin-suska.ac.id/59746/.
[11] L. Annisa and A. D. Kalifia, "Analisis Teknik TF-IDF Dalam Identifikasi Faktor-Faktor Penyebab Depresi Pada Individu," Gudang Jurnal Multidisiplin Ilmu, vol. 2024, pp. 1-12, 2024. Available:
https://gudangjurnal.com/index.php/gjmi/article/view/249.
[12] M. A. Maulana, "Politik, Olahraga, dan Islam Studi Kasus Pembatalan RI Menjadi Tuan Rumah Piala Dunia U-20 2023," Islamic Education, vol. 2023,
pp. 1-10, 2023. Available:
http://maryamsejahtera.com/index.php/Education/article/view/192.
[13] R. A. Rakhman, "Pengaruh Iklan Squeeze Frame pada Pertandingan Kualifikasi Piala Dunia Timnas Indonesia Terhadap Respon Afektif Penonton," Jurnal Komunikasi, Sosial dan Ilmu Politik, vol. 2025, pp. 1-15,
2025. Available:
https://jurnal.researchideas.org/index.php/retorika/article/view/197.
[14] A. A. Firdaus, A. I. Hadiana, and others, "Klasifikasi Sentimen pada Aplikasi Shopee Menggunakan Fitur Bag of Word dan Algoritma Random Forest,"
Research: Journal of..., vol. 2024, pp. 1-12, 2024. Available:
https://www.jurnal.ranahresearch.com/index.php/R2J/article/view/994.
LAMPIRAN
1. Meng Import Dataset import pandas as pd
pd.options.mode.chained_assignment = None # Nonaktifkan warning
gabungan_sentimen = pd.read_csv('gabungan_sentimen.csv') gabungan_sentimen =
gabungan_sentimen.drop(columns=['stopwords_removal','is_menyerang','is_kondi si_lapangan','is_bertahan'])
gabungan_sentimen
2. Preprocessing Teks import re
from nltk.tokenize import word_tokenize from nltk.corpus import stopwords
from Sastrawi.Stemmer.StemmerFactory import StemmerFactory
# URL raw GitHub
url = "https://raw.githubusercontent.com/panggi/pujangga/master/resource/
formalization/formalizationDict.txt"
# Membaca file dari URL
df_slang_word = pd.read_csv(url, sep="\t", header=None, names=["Informal",
"Formal"])
# Konversi DataFrame ke dictionary untuk lookup
slang_dict = dict(zip(df_slang_word["Informal"], df_slang_word["Formal"]))
# Mengubah teks menjadi lowercase def case_folding(text):
text = text.lower() return text
# membersihkan teks def cleaning(text):
# Ganti koma dengan koma yang diikuti spasi text = re.sub(r',', ', ', text)
# Ganti titik dengan titik yang diikuti spasi text = re.sub(r'\.', '. ', text)
# Hapus angka
text = re.sub(r'\d+', '', text)
# Hapus tanda baca dan simbol (kecuali huruf dan spasi) text = re.sub(r'[^\w\s]', '', text)
# Hapus karakter tambahan (opsional, jika ada spasi berlebihan)
text = re.sub(r'\s+', ' ', text).strip() return text
# mentokenisasi teks def tokenizing(text):
# Tokenisasi teks
tokens = word_tokenize(text) return tokens
# mengganti slang words def slang_words(tokens):
return [slang_dict.get(token, token) for token in tokens]
# mengubah kata menjadi kata dasar def stemming(tokens):
# Membuat stemmer Sastrawi factory = StemmerFactory()
stemmer = factory.create_stemmer()
# Stemming setiap token menggunakan Sastrawi stems = [stemmer.stem(token) for token in tokens]
return stems
# menggabungkan kata negasi dengan kata selanjutnya def convert_negation(tokens):
# Mengubah kata negasi menjadi bentuk aslinya
negation_words = ['tidak', 'bukan', 'tak', 'jangan', 'belum', 'tiada', 'takut', 'kurang']
for i in range(len(tokens)):
if tokens[i] in negation_words and i < len(tokens) - 1:
tokens[i + 1] = f"{tokens[i]}_{tokens[i + 1]}"
return tokens
# menghapus stopwords
def stopwords_removal(tokens):
# Menghapus stopwords
stop_words = set(stopwords.words('indonesian'))
# Menambahkan stopwords secara manual
additional_stopwords = ["dong", "nih", "sih", "aja", "deh" ,"hihi","nya"]
stop_words.update(additional_stopwords)
filtered_tokens = [token for token in tokens if token not in stop_words]
return filtered_tokens
3. Penerapan Preprocessing ke Dataset gabungan_sentimen['case_folding'] =
gabungan_sentimen['cleaning'] =
gabungan_sentimen['case_folding'].apply(cleaning)
gabungan_sentimen['tokenizing'] =
gabungan_sentimen['cleaning'].apply(tokenizing)
gabungan_sentimen['slang_words'] =
gabungan_sentimen['tokenizing'].apply(slang_words)
gabungan_sentimen['stemming'] =
gabungan_sentimen['slang_words'].apply(stemming)
gabungan_sentimen['convert_negation'] =
gabungan_sentimen['stemming'].apply(convert_negation)
gabungan_sentimen['stopwords_removal'] =
gabungan_sentimen['convert_negation'].apply(stopwords_removal)
gabungan_sentimen
4. Prediksi Sentimen untuk Indonesia (HOME dan AWAY)
id = gabungan_sentimen[['stopwords_removal','pertandingan', 'negara']]
id = id[id['negara'] == 'id']
id = id.drop(columns=['negara']).reset_index(drop=True)
id
a. Prediksi Menyerang HOME
id_menyerang_home = id[id['pertandingan'] == 'HOME']
id_menyerang_home
import pickle
# Memuat model dari file pickle
with open('model/random_forest_model_id_menyerang.pkl', 'rb') as file:
loaded_model_id_menyerang = pickle.load(file)
# Memuat vectorizer dari file pickle
with open('model/tfidf_vectorizer_model_id_menyerang.pkl', 'rb') as file:
loaded_vectorizer_id_menyerang = pickle.load(file)
print("Model dan vectorizer berhasil dimuat dari file.")
test_text_vectorized =
loaded_vectorizer_id_menyerang.transform(id_menyerang_home['stopwords_rem oval'].apply(lambda x: ' '.join(x)))
predictions = loaded_model_id_menyerang.predict(test_text_vectorized)
id_menyerang_home.loc[:, 'menyerang'] = predictions
id_menyerang_home['menyerang'].value_counts()
import matplotlib.pyplot as plt import seaborn as sns
# Data
counts = id_menyerang_home['menyerang'].value_counts()
label_mapping = {-1: "Negatif", 0: "Netral", 1: "Positif"} # Mapping label counts.index = counts.index.map(label_mapping) # Ubah label
# Plot
plt.figure(figsize=(10,6))
ax = sns.barplot(x=counts.index, y=counts.values, palette=['gray', 'green', 'red'])
# Tambahkan nilai di atas batang
for i, value in enumerate(counts.values):
ax.text(i, value + 40, str(value), ha='center', va='bottom', fontsize=12, fontweight='bold', color='black')
# Label
plt.xlabel("Kategori Sentimen") plt.ylabel("Jumlah")
plt.title("Distribusi Sentimen Indonesia Menyerang Home") plt.xticks(rotation=0)
# Tampilkan plt.show()
b. Prediksi Menyerang AWAY
id_menyerang_away = id[id['pertandingan'] == 'AWAY']
id_menyerang_away import pickle
# Memuat model dari file pickle
with open('model/random_forest_model_id_menyerang.pkl', 'rb') as file:
loaded_model_id_menyerang = pickle.load(file)
# Memuat vectorizer dari file pickle
with open('model/tfidf_vectorizer_model_id_menyerang.pkl', 'rb') as file:
loaded_vectorizer_id_menyerang = pickle.load(file)
print("Model dan vectorizer berhasil dimuat dari file.")
test_text_vectorized =
loaded_vectorizer_id_menyerang.transform(id_menyerang_away['stopwords_rem oval'].apply(lambda x: ' '.join(x)))
predictions = loaded_model_id_menyerang.predict(test_text_vectorized)
id_menyerang_away.loc[:, 'menyerang'] = predictions
id_menyerang_away['menyerang'].value_counts()
import matplotlib.pyplot as plt import seaborn as sns
# Data
counts = id_menyerang_away['menyerang'].value_counts()
label_mapping = {-1: "Negatif", 0: "Netral", 1: "Positif"} # Mapping label counts.index = counts.index.map(label_mapping) # Ubah label
# Plot
plt.figure(figsize=(10,6))
ax = sns.barplot(x=counts.index, y=counts.values, palette=['gray', 'green', 'red'])
# Tambahkan nilai di atas batang
for i, value in enumerate(counts.values):
ax.text(i, value + 45, str(value), ha='center', va='bottom', fontsize=12, fontweight='bold', color='black')
# Label
plt.xlabel("Kategori Sentimen") plt.ylabel("Jumlah")
plt.title("Distribusi Sentimen Indonesia Menyerang Away") plt.xticks(rotation=0)
# Tampilkan plt.show()
c. Prediksi Bertahan HOME
id_bertahan_home = id[id['pertandingan'] == 'HOME']
id_bertahan_home
import pickle
# Memuat model dari file pickle
with open('model/random_forest_model_id_bertahan.pkl', 'rb') as file:
loaded_model_id_bertahan = pickle.load(file)
# Memuat vectorizer dari file pickle
with open('model/tfidf_vectorizer_model_id_bertahan.pkl', 'rb') as file:
loaded_vectorizer_id_bertahan = pickle.load(file)
print("Model dan vectorizer berhasil dimuat dari file.")
test_text_vectorized =
loaded_vectorizer_id_bertahan.transform(id_bertahan_home['stopwords_removal' ].apply(lambda x: ' '.join(x)))
predictions = loaded_model_id_bertahan.predict(test_text_vectorized)
id_bertahan_home.loc[:, 'bertahan'] = predictions
id_bertahan_home['bertahan'].value_counts()
import matplotlib.pyplot as plt import seaborn as sns
# Data
counts = id_bertahan_home['bertahan'].value_counts()
label_mapping = {-1: "Negatif", 0: "Netral", 1: "Positif"} # Mapping label counts.index = counts.index.map(label_mapping) # Ubah label
# Plot
plt.figure(figsize=(10,6))
ax = sns.barplot(x=counts.index, y=counts.values, palette=['gray', 'green', 'red'])
# Tambahkan nilai di atas batang
for i, value in enumerate(counts.values):
ax.text(i, value + 40, str(value), ha='center', va='bottom', fontsize=12, fontweight='bold', color='black')
# Label
plt.xlabel("Kategori Sentimen") plt.ylabel("Jumlah")
plt.title("Distribusi Sentimen Indonesia Bertahan Home") plt.xticks(rotation=0)
# Tampilkan plt.show()
d. Prediksi Bertahan AWAY
id_bertahan_away = id[id['pertandingan'] == 'AWAY']
id_bertahan_away
import pickle
# Memuat model dari file pickle
with open('model/random_forest_model_id_bertahan.pkl', 'rb') as file:
loaded_model_id_bertahan = pickle.load(file)
# Memuat vectorizer dari file pickle
with open('model/tfidf_vectorizer_model_id_bertahan.pkl', 'rb') as file:
loaded_vectorizer_id_bertahan = pickle.load(file)
print("Model dan vectorizer berhasil dimuat dari file.")
test_text_vectorized =
loaded_vectorizer_id_bertahan.transform(id_bertahan_away['stopwords_removal' ].apply(lambda x: ' '.join(x)))
predictions = loaded_model_id_bertahan.predict(test_text_vectorized)
id_bertahan_away.loc[:, 'bertahan'] = predictions
id_bertahan_away['bertahan'].value_counts()
import matplotlib.pyplot as plt import seaborn as sns
# Data
counts = id_bertahan_away['bertahan'].value_counts()
label_mapping = {-1: "Negatif", 0: "Netral", 1: "Positif"} # Mapping label counts.index = counts.index.map(label_mapping) # Ubah label
# Plot
plt.figure(figsize=(10,6))
ax = sns.barplot(x=counts.index, y=counts.values, palette=['green', 'gray', 'red'])
# Tambahkan nilai di atas batang
for i, value in enumerate(counts.values):
ax.text(i, value + 40, str(value), ha='center', va='bottom', fontsize=12, fontweight='bold', color='black')
# Label
plt.xlabel("Kategori Sentimen") plt.ylabel("Jumlah")
plt.title("Distribusi Sentimen Indonesia Bertahan Away") plt.xticks(rotation=0)
# Tampilkan plt.show()
5. Prediksi Sentimen untuk Arab (HOME dan AWAY)
ar = gabungan_sentimen[['stopwords_removal','pertandingan', 'negara']]
ar = ar[ar['negara'] == 'ar']
ar = ar.drop(columns=['negara']).reset_index(drop=True)
ar
a. Prediksi Menyerang HOME
ar_menyerang_home = ar[ar['pertandingan'] == 'HOME']
ar_menyerang_home
import pickle
# Memuat model dari file pickle
with open('model/random_forest_model_ar_menyerang.pkl', 'rb') as file:
loaded_model_ar_menyerang = pickle.load(file)
# Memuat vectorizer dari file pickle
with open('model/tfidf_vectorizer_model_ar_menyerang.pkl', 'rb') as file:
loaded_vectorizer_ar_menyerang = pickle.load(file)
print("Model dan vectorizer berhasil dimuat dari file.")
test_text_vectorized =
loaded_vectorizer_ar_menyerang.transform(ar_menyerang_home['stopwords_rem oval'].apply(lambda x: ' '.join(x)))
predictions = loaded_model_ar_menyerang.predict(test_text_vectorized)
ar_menyerang_home.loc[:, 'menyerang'] = predictions
ar_menyerang_home['menyerang'].value_counts()
import matplotlib.pyplot as plt import seaborn as sns
# Data
counts = ar_menyerang_home['menyerang'].value_counts()
label_mapping = {-1: "Negatif", 0: "Netral", 1: "Positif"} # Mapping label counts.index = counts.index.map(label_mapping) # Ubah label
# Plot
plt.figure(figsize=(10,6))
ax = sns.barplot(x=counts.index, y=counts.values, palette=['red', 'gray', 'green'])
# Tambahkan nilai di atas batang
for i, value in enumerate(counts.values):
ax.text(i, value, str(value), ha='center', va='bottom', fontsize=12, fontweight='bold', color='black')
# Label
plt.xlabel("Kategori Sentimen") plt.ylabel("Jumlah")
plt.title("Distribusi Sentimen Arab Saudi Menyerang Home") plt.xticks(rotation=0)
# Tampilkan plt.show()
b. Prediksi Menyerang AWAY
ar_menyerang_away = ar[ar['pertandingan'] == 'AWAY']
ar_menyerang_away
import pickle
# Memuat model dari file pickle
with open('model/random_forest_model_ar_menyerang.pkl', 'rb') as file:
loaded_model_ar_menyerang = pickle.load(file)
# Memuat vectorizer dari file pickle
with open('model/tfidf_vectorizer_model_ar_menyerang.pkl', 'rb') as file:
loaded_vectorizer_ar_menyerang = pickle.load(file)
print("Model dan vectorizer berhasil dimuat dari file.")
test_text_vectorized =
loaded_vectorizer_ar_menyerang.transform(ar_menyerang_away['stopwords_rem oval'].apply(lambda x: ' '.join(x)))
predictions = loaded_model_ar_menyerang.predict(test_text_vectorized)
ar_menyerang_away.loc[:, 'menyerang'] = predictions
ar_menyerang_away['menyerang'].value_counts()
import matplotlib.pyplot as plt import seaborn as sns
# Data
counts = ar_menyerang_away['menyerang'].value_counts()
label_mapping = {-1: "Negatif", 0: "Netral", 1: "Positif"} # Mapping label counts.index = counts.index.map(label_mapping) # Ubah label
# Plot
plt.figure(figsize=(10,6))
ax = sns.barplot(x=counts.index, y=counts.values, palette=['gray', 'red', 'green'])
# Tambahkan nilai di atas batang
for i, value in enumerate(counts.values):
ax.text(i, value, str(value), ha='center', va='bottom', fontsize=12, fontweight='bold', color='black')
# Label
plt.xlabel("Kategori Sentimen") plt.ylabel("Jumlah")
plt.title("Distribusi Sentimen Arab Saudi Menyerang Away") plt.xticks(rotation=0)
# Tampilkan
plt.show()
c. Prediksi Bertahan HOME
ar_bertahan_home = ar[ar['pertandingan'] == 'HOME']
ar_bertahan_home
import pickle
# Memuat model dari file pickle
with open('model/random_forest_model_ar_bertahan.pkl', 'rb') as file:
loaded_model_ar_bertahan = pickle.load(file)
# Memuat vectorizer dari file pickle
with open('model/tfidf_vectorizer_model_ar_bertahan.pkl', 'rb') as file:
loaded_vectorizer_ar_bertahan = pickle.load(file)
print("Model dan vectorizer berhasil dimuat dari file.")
test_text_vectorized =
loaded_vectorizer_ar_bertahan.transform(ar_bertahan_home['stopwords_removal' ].apply(lambda x: ' '.join(x)))
predictions = loaded_model_ar_bertahan.predict(test_text_vectorized)
ar_bertahan_home.loc[:, 'bertahan'] = predictions
ar_bertahan_home['bertahan'].value_counts()
import matplotlib.pyplot as plt import seaborn as sns
# Data
counts = ar_bertahan_home['bertahan'].value_counts()
label_mapping = {-1: "Negatif", 0: "Netral", 1: "Positif"} # Mapping label counts.index = counts.index.map(label_mapping) # Ubah label
# Plot
plt.figure(figsize=(10,6))
ax = sns.barplot(x=counts.index, y=counts.values, palette=['gray', 'red', 'green'])
# Tambahkan nilai di atas batang
for i, value in enumerate(counts.values):
ax.text(i, value, str(value), ha='center', va='bottom', fontsize=12, fontweight='bold', color='black')
# Label
plt.xlabel("Kategori Sentimen") plt.ylabel("Jumlah")
plt.title("Distribusi Sentimen Arab Saudi Bertahan Home") plt.xticks(rotation=0)
# Tampilkan plt.show()
d. Prediksi Bertahan AWAY
ar_bertahan_away = ar[ar['pertandingan'] == 'AWAY']
ar_bertahan_away
import pickle
# Memuat model dari file pickle
with open('model/random_forest_model_ar_bertahan.pkl', 'rb') as file:
loaded_model_ar_bertahan = pickle.load(file)
# Memuat vectorizer dari file pickle
with open('model/tfidf_vectorizer_model_ar_bertahan.pkl', 'rb') as file:
loaded_vectorizer_ar_bertahan = pickle.load(file)
print("Model dan vectorizer berhasil dimuat dari file.")
test_text_vectorized =
loaded_vectorizer_ar_bertahan.transform(ar_bertahan_away['stopwords_removal' ].apply(lambda x: ' '.join(x)))
predictions = loaded_model_ar_bertahan.predict(test_text_vectorized)
ar_bertahan_away.loc[:, 'bertahan'] = predictions
ar_bertahan_away['bertahan'].value_counts()
import matplotlib.pyplot as plt import seaborn as sns
# Data
counts = ar_bertahan_away['bertahan'].value_counts()
label_mapping = {-1: "Negatif", 0: "Netral", 1: "Positif"} # Mapping label counts.index = counts.index.map(label_mapping) # Ubah label
# Plot
plt.figure(figsize=(10,6))
ax = sns.barplot(x=counts.index, y=counts.values, palette=['gray', 'red', 'green'])
# Tambahkan nilai di atas batang
for i, value in enumerate(counts.values):
ax.text(i, value, str(value), ha='center', va='bottom', fontsize=12, fontweight='bold', color='black')
# Label
plt.xlabel("Kategori Sentimen") plt.ylabel("Jumlah")
plt.title("Distribusi Sentimen Arab Saudi Bertahan Away") plt.xticks(rotation=0)
# Tampilkan plt.show()
6. Prediksi Sentimen untuk Kondisi Lapangan (HOME dan AWAY) a. Prediksi Kondisi Lapangan HOME
home = gabungan_sentimen[['stopwords_removal','pertandingan']]
home = home[home['pertandingan'] == 'HOME']
home
import pickle
# Memuat model dari file pickle
with open('model/random_forest_model_home.pkl', 'rb') as file:
loaded_model_home = pickle.load(file)
# Memuat vectorizer dari file pickle
with open('model/tfidf_vectorizer_model_home.pkl', 'rb') as file:
loaded_vectorizer_home = pickle.load(file)
print("Model dan vectorizer berhasil dimuat dari file.")
test_text_vectorized =
loaded_vectorizer_home.transform(home['stopwords_removal'].apply(lambda x: ' '.join(x)))
predictions = loaded_model_home.predict(test_text_vectorized)
home.loc[:, 'kondisi_lapangan'] = predictions
home['kondisi_lapangan'].value_counts()
import matplotlib.pyplot as plt import seaborn as sns
# Data
counts = home['kondisi_lapangan'].value_counts()
label_mapping = {-1: "Negatif", 0: "Netral", 1: "Positif"} # Mapping label counts.index = counts.index.map(label_mapping) # Ubah label
# Plot
plt.figure(figsize=(10,6))
ax = sns.barplot(x=counts.index, y=counts.values, palette=['gray', 'green', 'red'])
# Tambahkan nilai di atas batang
for i, value in enumerate(counts.values):
ax.text(i, value, str(value), ha='center', va='bottom', fontsize=12, fontweight='bold', color='black')
# Label
plt.xlabel("Kategori Sentimen") plt.ylabel("Jumlah")
plt.title("Distribusi Sentimen Kondisi Lapangan Home") plt.xticks(rotation=0)
# Tampilkan plt.show()
b. Prediksi Kondisi Lapangan AWAY
away = gabungan_sentimen[['stopwords_removal','pertandingan']]
away = away[away['pertandingan'] == 'AWAY']
away
import pickle
# Memuat model dari file pickle
with open('model/random_forest_model_away.pkl', 'rb') as file:
loaded_model_away = pickle.load(file)
# Memuat vectorizer dari file pickle