classPerceptron:
def__init__(self,eta,epochs):
np.random.seed(42)
self.weights=np.random.randn(3) *10**-4logging.info(f"intial weights brfore training: {self.weights}")
self.eta=etaself.epochs=epochs#self declares variable as global for all the methodsdefactivationFunction(self,inputs,weights):
z=np.dot(inputs,weights)
returnnp.where(z>0, 1,0)
deffit(self, X, y):
self.X=Xself.y=yX_with_bias=np.c_[self.X, -np.ones((len(self.X), 1))] #concatenation of X and bias logging.info(f"X with bias: {X_with_bias}")
forepochintqdm(range(self.epochs),total=self.epochs, desc="training the model"):
logging.info("--"*10)
logging.info(f"for epoch: {epoch}")
logging.info("--"*10)
y_hat=self.activationFunction(X_with_bias, self.weights) #forward propogationlogging.info(f"prdicted value after forward pass: \n{y_hat}")
self.error=self.y-y_hatlogging.info(f"error : \n{self.error}")
self.weights=self.weights+self.eta*np.dot(X_with_bias.T, self.error) #backward propogationlogging.info(f"updated weights after epoch : \n{epoch}/{self.epochs} : \n{self.weights}")
logging.info("######"*10)
defpredict(self, X):
X_with_bias=np.c_[X, -np.ones((len(X), 1))]
returnself.activationFunction(X_with_bias, self.weights)
deftotal_loss(self):
total_loss=np.sum(self.error)
logging.info(f"total loss : {total_loss}")
returntotal_loss
PLOTS:-
AND plot:
OR plot:
Commands used-
conda env list
mkdir utils
touch utils/__init__.py #to treat utility as a package
touch utils/model.py
touch utils/all_utils.py (keep all helper functions in all_utils folder)
from utils.model import class
touch requirements.txt
conda create -n sagar python==3.8 -y
conda activate sagar
pip freeze > requirements.txt
pip install -r requirements.txt
git add .&& git commit -m "docstring updated"&& git push origin main
git checkout "committed id no" (to go to specific version)
#to see utils as a pckage, just type python in the terminal
import utils
utils.__version__ #check utils package version