Python - Examples

Reverse a Number Example

Reverse Number using While Loop

x = 12348
y = 0
while x != 0:
    y = y * 10 + x % 10
    x = int(x / 10)
    print(x)
print(y)

How to find a missing number in an array

The solution assumes that an array is not sorted

x = [1, 2, 3, 5, 6, 7, 8]
s_x = 0
s_all = 0
for t in x:
    s_x = s_x + t
for i in range(1, 9):
    s_all = s_all + i
print(s_all - s_x)

Loop Through File Line By Line

with open('text.txt', 'r') as f:
    for line in f:
        print(line)

Socket Connection Class Example

import socket

class SocketConnection:

    def __init__(self, port,ip):
        self.port = port
        self.ip = ip
        self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.hostname = self.ip
        self.socket.bind((self.hostname, self.port))
        self.socket.listen()
        self.socket_connection, self.socket_address = self.socket.accept()
        self.buff_size = 1024
        print("Client connected via socket connection.")

    def close(self):
        self.socket.close()

    def receive_data(self):
        data = b''
        while True:
                partial_data = self.socket_connection.recv(self.buff_size)
                data = data + partial_data
                if len(partial_data) < self.buff_size:
                    # either 0 or end of data
                    break
        return data

    def send_data(self, data):
        self.socket_connection.sendall(data)

Fork Example

os.fork() method is only working on UNIX platforms.

import os


def parent_child_process():
    os_fork_id = os.fork()
    pid = os.getpid()
    if os_fork_id > 0:
        print("Parent process: ", pid)
    else:
        print("Child process: ", pid)


parent_child_process()

PIPE Example


from subprocess import PIPE,Popen

p = Popen(['tasklist'],stdout=PIPE)
p.stdout.readline()
for line in p.stdout:
    l = line.decode('UTF-8').strip().split()
    print(l)

Tic Tac Toe

x = 0
while x != '9':
    player1 = input("player1: 1 - Rock 2 - Scissors 3- Paper: ")
    player2 = input("player2: 1 - Rock 2 - Scissors 3- Paper: ")
    if player1 == player2:
        print("It's a tie!")
    elif player1 == '1':
        if player2 == '2':
            print("player1 wins!")
        else:
            print("player2 wins!")
    elif player1 == '2':
        if player2 == '3':
            print("player1 win!")
        else:
            print("player2 wins!")
    elif player1 == '3':
        if player2 == '1':
            print("player1 wins!")
        else:
            print("print win!")
    else:
        print("Invalid input")
    x = input("1 - new game 9 - exit: ")
print("Game Over")

Transposing a nxn Matrix

ma = [[1, 2, 3, 4],
      [5, 6, 7, 8],
      [9, 10, 11, 12],
      [13, 14, 15, 16]]
size = len(ma)
for i in range(0, size):
    for j in range(i + 1, size):
        tmp = ma[i][j]
        ma[i][j] = ma[j][i]
        ma[j][i] = tmp
print(ma)

Find all Even Numbers in a List

Loop through the entire list a and adds only the even items to list b.

a = [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]
b = [x for x in a if x % 2 == 0]
print(b)

Python Multithreading Plus One using for

def i_plus():
    global i
    for j in range(0, 100):
        i = i + 1


i_plus_number = 2
i = 0
for k in range(0, i_plus_number):
    t = threading.Thread(target=i_plus)
    t.start()
print(i)

K-NN Example

import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets

n_neighbors = 1

# import some data to play with
iris = datasets.load_iris()

# we only take the first two features. We could avoid this 
# slicing by using a two-dim dataset
X = iris.data[:, :2]
y = iris.target

h = .02  # step size in the mesh

# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])

accuracy_uniform = []
accuracy_distance = []
for n_neighbors in range(1, 21):
    clf = neighbors.KNeighborsClassifier(n_neighbors, weights='uniform')
    clf.fit(X, y)
    accuracy_uniform.append(clf.score(X, y))
    print(clf.score(X, y))


for n_neighbors in range(1, 21):
    clf = neighbors.KNeighborsClassifier(n_neighbors, weights='distance')
    clf.fit(X, y)
    accuracy_distance.append(clf.score(X, y))
neighbors = [*range(1, 21)]
plt.title('Accuracy vs Neighbors')
plt.plot(neighbors, accuracy_uniform, label='Uniform Accuracy')
plt.plot(neighbors, accuracy_distance, label='Distance Accuracy')
plt.legend()
plt.xlabel('Number of neighbors')
plt.ylabel('Accuracy')
plt.show()

Deep Learning Example

from dataBase import DataBase
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Flatten, Dropout
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping
import matplotlib.pyplot as plot


def main():
    d = DataBase()
    print("start")
    train_images, train_labels = d.load_data(dataType="train")
    test_images, test_labels = d.load_data(dataType="test")
    validation_images, validation_labels = d.load_data(dataType="validation")  # ask David

    # Image augmentation - In order to avoid overfitting problem.
    # It takes each image and modifies it slightly on every epoch.
    
    imageDataGenerator = ImageDataGenerator(
        rotation_range=15,  # randomly rotate images in the range (degrees, 0 to 180)
        width_shift_range=0.05,  # randomly shift images horizontally
        height_shift_range=0.05,  # randomly shift images vertically
        zoom_range=0.20,  # randomly zoom image
        horizontal_flip=True,  # randomly flip images horizontal
        vertical_flip=False,  # randomly flip images vertical
    )
    imageDataGenerator.fit(train_images)
    # Modelling
    imageSize = d.get_image_size()
    model = Sequential()  # plain stack of layers

    model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu', input_shape=(imageSize, imageSize, 1), padding='same'))
    # Param = 32*(1*3*3+1) = 320,
    # Output Shape = (None, 125, 125, 32)
    # Conv2D:
    #   input_shape - Height, width , cols, channels(1 - GRAYSCALE, 3 - RGB)
    #   filters (out channels) - dimensionality of the output space (i.e. the number of output filters in the convolution).
    #   Param in Conv2D = out_channels * (in_channels * kernel_h * kernel_w + 1)
    #   activation = 'relu' - max(x, 0)

    model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu', padding='same'))
    # Param = 32*(32*3*3+1) = 9248,
    # Output Shape = (None, 125, 125, 32)

    model.add(MaxPooling2D(pool_size=(3, 3)))
    # Down samples the input representation by taking the maximum value over the window defined by pool_size
    # output_shape = (input_shape - pool_size + 1) / strides)
    # strides - default to pool_size.
    # Param = 0 , Output Shape = (None, 41, 41, 32)

    model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same'))
    # Output shape = (None, 41, 41, 64)
    model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same'))
    # Output shape = (None, 41, 41, 64)
    model.add(MaxPooling2D(pool_size=(3, 3)))
    # Output shape = (None, 13, 13, 64)

    model.add(Conv2D(filters=128, kernel_size=(3, 3), activation='relu', padding='same'))
    # Output shape = (None, 13, 13, 128)
    model.add(Conv2D(filters=128, kernel_size=(3, 3), activation='relu', padding='same'))
    # Output shape = (None, 13, 13, 128)
    model.add(MaxPooling2D(pool_size=(3, 3)))
    # Output shape = (None, 4, 4, 128)

    model.add(Flatten())
    # Output Shape = 4 * 4 * 128 = 2048
    # Flattens the input to a single linear vector for fully connected layer ,
    # Output Shape  = multiply all input Shape for example input Shape (None, 85, 85, 32) Param = 85*85*32 =  231200.

    model.add(Dense(units=185, activation='relu'))
    # Param = 2048 * 185 + 185 = 379065
    # output = activation(dot(input, kernel) + bias) ,
    # Output Shape = units number (185), Param = units*(input Shape) + units for example input Shape = 231200 Param = 231200*128+128 = 29593728

    model.add(Dropout(rate=0.35))
    # The Dropout layer randomly sets input units to 0 with a frequency of rate at each step during training time, which helps to prevent overfitting.
    # Inputs not set to 0 are scaled up by 1/(1 - rate) such that the sum over all inputs is unchanged.

    model.add(Dense(units=1, activation='sigmoid'))
    # activation = 'sigmoid' - sigmoid(x) = 1 / (1 + exp(-x)), returns a value between 0 and 1.
    # output = activation(dot(input, kernel) + bias)

    # Configures the model for training.
    optimizer = Adam(lr=0.00015, decay=0.85e-5)
    model.compile(optimizer=optimizer, loss="BinaryCrossentropy", metrics=['accuracy'])

    model.summary()

    callback = EarlyStopping(monitor='val_accuracy', mode="max", patience=3)
    # Stop training when a monitored metric has stopped improving.
    # patience - Number of epochs with no improvement after which training will be stopped.

    epochsNumber = 10

    history = model.fit(imageDataGenerator.flow(train_images, train_labels), validation_data=(test_images, test_labels), epochs=epochsNumber)
    # Trains the model for a fixed number of epochs

    epochs = history.epoch
    figure, axes = plot.subplots(1, 2)
    train_accuracy = history.history['accuracy']
    train_loss = history.history['loss']
    validation_accuracy = history.history['val_accuracy']
    validation_loss = history.history['val_loss']

    axes[0].plot(epochs, train_accuracy, 'go-', label='Training Accuracy')
    axes[0].plot(epochs, validation_accuracy, 'ro-', label='Validation Accuracy')
    axes[0].set_title('Training & Validation Accuracy')
    axes[0].legend()
    axes[0].set_xlabel("Epochs")
    axes[0].set_ylabel("Accuracy")

    axes[1].plot(epochs, train_loss, 'g-o', label='Training Loss')
    axes[1].plot(epochs, validation_loss, 'r-o', label='Validation Loss')
    axes[1].set_title('Loss')
    axes[1].legend()
    axes[1].set_xlabel("Epochs")
    axes[1].set_ylabel("Training & Validation Loss")
    plot.show()

    # test_loss, test_accuracy = model.evaluate(test_images, test_labels)
    # print("Accuracy of the model is - ", test_accuracy)
    # print("Loss of the model is - ", test_loss)

    print("end")


if __name__ == "__main__":
    main()